v->arch.perdomain_ptes = perdomain_ptes(d, v);
- if ( (rc = xstate_alloc_save_area(v)) != 0 )
+ if ( (rc = vcpu_init_fpu(v)) != 0 )
return rc;
- if ( v->arch.xsave_area )
- v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse;
- else if ( !is_idle_domain(d) )
- {
- v->arch.fpu_ctxt = _xmalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
- if ( !v->arch.fpu_ctxt )
- {
- rc = -ENOMEM;
- goto done;
- }
- memset(v->arch.fpu_ctxt, 0, sizeof(v->arch.xsave_area->fpu_sse));
- }
if ( is_hvm_domain(d) )
{
done:
if ( rc )
{
- if ( v->arch.xsave_area )
- xstate_free_save_area(v);
- else
- xfree(v->arch.fpu_ctxt);
+ vcpu_destroy_fpu(v);
+
if ( !is_hvm_domain(d) && standalone_trap_ctxt(v) )
free_xenheap_page(v->arch.pv_vcpu.trap_ctxt);
}
if ( is_pv_32on64_vcpu(v) )
release_compat_l4(v);
- if ( v->arch.xsave_area )
- xstate_free_save_area(v);
- else
- xfree(v->arch.fpu_ctxt);
+ vcpu_destroy_fpu(v);
if ( is_hvm_vcpu(v) )
hvm_vcpu_destroy(v);
}
}
+/*******************************/
+/* VCPU FPU Functions */
+/*******************************/
+/* Initialize FPU's context save area */
+int vcpu_init_fpu(struct vcpu *v)
+{
+ int rc = 0;
+
+ /* Idle domain doesn't have FPU state allocated */
+ if ( is_idle_vcpu(v) )
+ goto done;
+
+ if ( (rc = xstate_alloc_save_area(v)) != 0 )
+ return rc;
+
+ if ( v->arch.xsave_area )
+ v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse;
+ else
+ {
+ v->arch.fpu_ctxt = _xmalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
+ if ( !v->arch.fpu_ctxt )
+ {
+ rc = -ENOMEM;
+ goto done;
+ }
+ memset(v->arch.fpu_ctxt, 0, sizeof(v->arch.xsave_area->fpu_sse));
+ }
+
+done:
+ return rc;
+}
+
+/* Free FPU's context save area */
+void vcpu_destroy_fpu(struct vcpu *v)
+{
+ if ( v->arch.xsave_area )
+ xstate_free_save_area(v);
+ else
+ xfree(v->arch.fpu_ctxt);
+}
+
/*
* Local variables:
* mode: C
void setup_fpu(struct vcpu *v);
void save_init_fpu(struct vcpu *v);
+int vcpu_init_fpu(struct vcpu *v);
+void vcpu_destroy_fpu(struct vcpu *v);
#endif /* __ASM_I386_I387_H */